# I load the needed libraries
library(dplyr)
library(scales)
library(GoFKernel)
library(mvtnorm)
library(gplots)
options(warn=-1)
Attaching package: ‘dplyr’
The following objects are masked from ‘package:stats’:
filter, lag
The following objects are masked from ‘package:base’:
intersect, setdiff, setequal, union
Loading required package: KernSmooth
KernSmooth 2.23 loaded
Copyright M. P. Wand 1997-2009
Attaching package: ‘gplots’
The following object is masked from ‘package:stats’:
lowess
I load the functions from the class file:
source("../MCMC.R")
I define then the function that I want to use as output of the MCMCs:
# Function to sampled from: n-dim gaussian with chosen sigmas and centers
posterior_g_inhom = function (theta) {
sigmas = c(1:length(theta))
centers = c(seq(length(theta), 1))
product = 1
for (i in 1:length(theta)) {
product = product * exp(-(theta[i] - centers[i])**2/sigmas[i]**2)
}
return (product)
}
chosen_function = posterior_g_inhom
Then I only have to determine the parameters for the initialization = the "hyperparameters" of the simulations
# The initial parameters are:
init = c(4, 4, 4)
std = diag(1, 3)
N = as.integer(1e5)
burn_in = as.integer(1e4)
# N = as.integer(1e4)
# burn_in = as.integer(1e3)
print_step = as.integer(1e2)
# print_init = as.integer(1e3)
N_tot = N + burn_in
# For Haario:
epsilon = 0.001
# MVTNORM
# Evaluate then the MCMC
mcmc_g = random_steps_mvtnorm (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE)
# Selecting the sequence after the burn-in
mcmc_g = mcmc_g[burn_in:N, ]
# Plotting the results
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 49.19091 %
# MVTNORM GIBBS
mcmc_g = random_steps_mvtnorm_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 59.9397 %
# # SIMPLE ADAPTIVE
# mcmc_g = random_steps_simple (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
# gamma_function = gamma_series_exp, halved_step = burn_in)
# mcmc_g = mcmc_g[burn_in:N, ]
# show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
# # SIMPLE ADAPTIVE GIBBS
# mcmc_g = random_steps_simple_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
# gamma_function = gamma_series_exp, halved_step = burn_in)
# mcmc_g = mcmc_g[burn_in:N, ]
# show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
# HAARIO
mcmc_g = random_steps_haario (func_wanted = chosen_function, theta_init = init, n_samples = N_tot,
sigma = std, print_accept=TRUE, t_0 = burn_in, eps = epsilon)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 13.38 %
Final mean = 3.010565 2.006442 1.006788
Final covariance matrix =
[,1] [,2] [,3]
[1,] 32.01761 20.815188 10.782943
[2,] 20.81519 17.382166 7.081716
[3,] 10.78294 7.081716 11.981753
# HAARIO GIBBS
mcmc_g = random_steps_haario_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot,
sigma = std, print_accept=TRUE, t_0 = burn_in, eps = epsilon)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 16.42121 %
Final mean = 2.998336 2.023568 1.038636
Final covariance matrix =
[,1] [,2] [,3]
[1,] 31.89463 20.603230 10.548884
[2,] 20.60323 16.939024 7.119524
[3,] 10.54888 7.119524 10.533057
# RAO
mcmc_g = random_steps_AM_rao (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in/2)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 49.14364 %
Final mean = 3.02395 1.99509 1.045245
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.51438743 0.01100274 -0.04794105
[2,] 0.01100274 1.96246714 -0.03121061
[3,] -0.04794105 -0.03121061 4.82883731
# RAO GIBBS
mcmc_g = random_steps_AM_rao_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in/2)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 61.61788 %
Final mean = 3.005943 1.990921 1.019102
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.603688743 0.02412376 -0.003530455
[2,] 0.024123759 1.92959584 -0.033227912
[3,] -0.003530455 -0.03322791 4.022010091
# GLOBAL
mcmc_g = random_steps_global (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 34.59273 %
Final mean = 2.686614 2.675485 0.6791724
Final lambda = 4.16024
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.541368329 -0.004600986 0.8897246
[2,] -0.004600986 2.643012436 1.3650727
[3,] 0.889724559 1.365072660 8.3562215
# GLOBAL GIBBS
mcmc_g = random_steps_global_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 42.75879 %
Final mean = 2.973623 2.02435 0.854024
Final lambda = 0.848988
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.59121947 0.0519512 -0.04067516
[2,] 0.05195120 1.8400444 -0.14776614
[3,] -0.04067516 -0.1477661 3.68229156